dd2f3d
@@ -44,11 +44,13 @@
import org.apache.hadoop.hbase.util.Bytes;
  * long value), then you can pass in your own comparator instead.
  * <p>
  * You must also specify a family and qualifier.  Only the value of this column 
- * will be tested.  All other 
+ * will be tested.
  * <p>
- * To prevent the entire row from being emitted if this filter determines the
- * column does not pass (it should be filtered), wrap this filter with a
- * {@link SkipFilter}.
+ * To prevent the entire row from being emitted if the column is not found
+ * on a row, use {@link #setFilterIfMissing}.
+ * <p>
+ * Otherwise, if the column is found, the entire row will be emitted only if
+ * the value passes.  If the value fails, the row will be filtered out.
  * <p>
  * To filter based on the value of all scanned columns, use {@link ValueFilter}.
  */
@@ -59,7 +61,10 @@
public class SingleColumnValueFilter implements Filter {
   private byte [] columnQualifier; 
   private CompareOp compareOp;
   private WritableByteArrayComparable comparator;
-
+  private boolean foundColumn = false;
+  private boolean matchedColumn = false;
+  private boolean filterIfMissing = false;
+  
   /**
    * Writable constructor, do not use.
    */
@@ -109,13 +114,22 @@
public class SingleColumnValueFilter implements Filter {
   }
 
   public ReturnCode filterKeyValue(KeyValue keyValue) {
+    if(matchedColumn) {
+      // We already found and matched the single column, all keys now pass
+      return ReturnCode.INCLUDE;
+    } else if(foundColumn) {
+      // We found but did not match the single column, skip to next row
+      return ReturnCode.NEXT_ROW;
+    }
     if (!keyValue.matchingColumn(this.columnFamily, this.columnQualifier)) {
       return ReturnCode.INCLUDE;
     }
+    foundColumn = true;
     if (filterColumnValue(keyValue.getBuffer(),
         keyValue.getValueOffset(), keyValue.getValueLength())) {
       return ReturnCode.NEXT_ROW;
     }
+    matchedColumn = true;
     return ReturnCode.INCLUDE;
   }
 
@@ -147,10 +161,34 @@
public class SingleColumnValueFilter implements Filter {
   }
 
   public boolean filterRow() {
-    return false;
+    // If column was found, return false if it was matched, true if it was not
+    // If column not found, return true if we filter if missing, false if not
+    return foundColumn ? !matchedColumn : filterIfMissing;
   }
 
   public void reset() {
+    foundColumn = false;
+    matchedColumn = false;
+  }
+  
+  /**
+   * Get whether entire row should be filtered if column is not found.
+   * @return filterIfMissing true if row should be skipped if column not found,
+   * false if row should be let through anyways
+   */
+  public boolean getFilterIfMissing() {
+    return filterIfMissing;
+  }
+  
+  /**
+   * Set whether entire row should be filtered if column is not found.
+   * <p>
+   * If true, the entire row will be skipped if the column is not found.
+   * <p>
+   * If false, the row will pass if the column is not found.  This is default.
+   */
+  public void setFilterIfMissing(boolean filterIfMissing) {
+    this.filterIfMissing = filterIfMissing;
   }
 
   public void readFields(final DataInput in) throws IOException {
@@ -165,6 +203,9 @@
public class SingleColumnValueFilter implements Filter {
     compareOp = CompareOp.valueOf(in.readUTF());
     comparator = (WritableByteArrayComparable) HbaseObjectWritable.readObject(in,
         null);
+    foundColumn = in.readBoolean();
+    matchedColumn = in.readBoolean();
+    filterIfMissing = in.readBoolean();
   }
 
   public void write(final DataOutput out) throws IOException {
@@ -173,5 +214,8 @@
public class SingleColumnValueFilter implements Filter {
     out.writeUTF(compareOp.name());
     HbaseObjectWritable.writeObject(out, comparator,
         WritableByteArrayComparable.class, null);
+    out.writeBoolean(foundColumn);
+    out.writeBoolean(matchedColumn);
+    out.writeBoolean(filterIfMissing);
   }
 }
\ No newline at end of file
